import os
import torch
import torchvision
import pandas as pd
import torch.nn as nn
import torch.nn.functional as F
from tqdm.notebook import tqdm
import torchvision.models as models
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torch.utils.data import random_split
from torchvision.utils import make_grid
import torchvision.transforms as transforms
from torchvision.datasets.folder import default_loader
import matplotlib.pyplot as plt
%matplotlib inline
!pip install opendatasets --upgrade --quiet # Needed to upload dataset directly from Kaggle to Colab without storing on PC
import opendatasets as od
dataset_url='https://www.kaggle.com/ikarus777/best-artworks-of-all-time'
od.download(dataset_url)
0%| | 0.00/2.29G [00:00<?, ?B/s]
Downloading best-artworks-of-all-time.zip to ./best-artworks-of-all-time
100%|██████████| 2.29G/2.29G [00:30<00:00, 81.6MB/s]
# Listing all artists in dataset
artists = pd.read_csv("best-artworks-of-all-time/artists.csv")
for i in artists['name']:
print(i,end=" , ")
Amedeo Modigliani , Vasiliy Kandinskiy , Diego Rivera , Claude Monet , Rene Magritte , Salvador Dali , Edouard Manet , Andrei Rublev , Vincent van Gogh , Gustav Klimt , Hieronymus Bosch , Kazimir Malevich , Mikhail Vrubel , Pablo Picasso , Peter Paul Rubens , Pierre-Auguste Renoir , Francisco Goya , Frida Kahlo , El Greco , Albrecht Dürer , Alfred Sisley , Pieter Bruegel , Marc Chagall , Giotto di Bondone , Sandro Botticelli , Caravaggio , Leonardo da Vinci , Diego Velazquez , Henri Matisse , Jan van Eyck , Edgar Degas , Rembrandt , Titian , Henri de Toulouse-Lautrec , Gustave Courbet , Camille Pissarro , William Turner , Edvard Munch , Paul Cezanne , Eugene Delacroix , Henri Rousseau , Georges Seurat , Paul Klee , Piet Mondrian , Joan Miro , Andy Warhol , Paul Gauguin , Raphael , Michelangelo , Jackson Pollock ,
# Fixing constant size for all images
batch_size = 128
image_size = (64,64)
stats = (0.5, 0.5, 0.5), (0.5, 0.5, 0.5)
transform_ds = transforms.Compose([transforms.Resize(image_size),
# transforms.RandomCrop(32, padding=2),
# transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(*stats)
])
train_ds = torchvision.datasets.ImageFolder(root="best-artworks-of-all-time/resized",
transform=transform_ds)
train_dl = DataLoader(train_ds, batch_size, shuffle=True, num_workers=3, pin_memory=True)
print(len(train_ds))
8683
/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:481: UserWarning: This DataLoader will create 3 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary. cpuset_checked))
# Checking out the ready artworks after transformations
images,_ = train_ds[874]
print(images.size())
plt.imshow(images.permute(1,2,0))
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
torch.Size([3, 64, 64])
<matplotlib.image.AxesImage at 0x7ff097355490>
images,_ = train_ds[5648]
plt.imshow(images.permute(1,2,0))
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
<matplotlib.image.AxesImage at 0x7ff0971944d0>
images,_ = train_ds[7120]
plt.imshow(images.permute(1,2,0))
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
<matplotlib.image.AxesImage at 0x7ff09705c8d0>
def denorm(img_tensors):
return img_tensors * stats[1][0] + stats[0][0]
def show_images(images, nmax=64):
fig, ax = plt.subplots(figsize=(8, 8))
ax.set_xticks([]); ax.set_yticks([])
ax.imshow(make_grid(denorm(images.detach()[:nmax]), nrow=8).permute(1, 2, 0))
def show_batch(dl, nmax=64):
for images, _ in dl:
show_images(images, nmax)
break
show_batch(train_dl) # One batch of artworks in training dataset
/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:481: UserWarning: This DataLoader will create 3 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary. cpuset_checked))
def get_default_device():
# Pick GPU if available, else CPU
if torch.cuda.is_available():
return torch.device('cuda')
else:
return torch.device('cpu')
def to_device(data, device):
# Move tensor(s) to chosen device
if isinstance(data, (list,tuple)):
return [to_device(x, device) for x in data]
return data.to(device, non_blocking=True)
class DeviceDataLoader():
# Wrap a dataloader to move data to a device
def __init__(self, dl, device):
self.dl = dl
self.device = device
def __iter__(self):
# Yield a batch of data after moving it to device
for b in self.dl:
yield to_device(b, self.device)
def __len__(self):
# Number of batches
return len(self.dl)
device = get_default_device()
device
device(type='cuda')
train_dl = DeviceDataLoader(train_dl, device)
#DISCRIMINATOR
discriminator = nn.Sequential(
# in: 3 x 64 x 64
nn.Conv2d(3, 64, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.2, inplace=True),
# out: 64 x 32 x 32
nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2, inplace=True),
# out: 128 x 16 x 16
nn.Conv2d(128, 256, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2, inplace=True),
# out: 256 x 8 x 8
nn.Conv2d(256, 512, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2, inplace=True),
# out: 512 x 4 x 4
nn.Conv2d(512, 1, kernel_size=4, stride=1, padding=0, bias=False),
# out: 1 x 1 x 1
nn.Flatten(),
nn.Sigmoid())
discriminator = to_device(discriminator, device)
latent_size = 150
#GENERATOR
generator = nn.Sequential(
# in: latent_size x 1 x 1
nn.ConvTranspose2d(latent_size, 512, kernel_size=4, stride=1, padding=0, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(True),
# out: 512 x 4 x 4
nn.ConvTranspose2d(512, 256, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(True),
# out: 256 x 8 x 8
nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(True),
# out: 128 x 16 x 16
nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(True),
# out: 64 x 32 x 32
nn.ConvTranspose2d(64, 3, kernel_size=4, stride=2, padding=1, bias=False),
nn.Tanh()
# out: 3 x 64 x 64
)
# Fake batch of artwork for training
xb = torch.randn(batch_size, latent_size, 1, 1) # random latent tensors
fake_images = generator(xb)
print(fake_images.shape)
show_images(fake_images)
torch.Size([128, 3, 64, 64])
generator = to_device(generator, device)
def train_discriminator(real_images, opt_d):
# Clear discriminator gradients
opt_d.zero_grad()
# Pass real images through discriminator
real_preds = discriminator(real_images)
real_targets = torch.ones(real_images.size(0), 1, device=device)
real_loss = F.binary_cross_entropy(real_preds, real_targets)
real_score = torch.mean(real_preds).item()
# Generate fake images
latent = torch.randn(batch_size, latent_size, 1, 1, device=device)
fake_images = generator(latent)
# Pass fake images through discriminator
fake_targets = torch.zeros(fake_images.size(0), 1, device=device)
fake_preds = discriminator(fake_images)
fake_loss = F.binary_cross_entropy(fake_preds, fake_targets)
fake_score = torch.mean(fake_preds).item()
# Update discriminator weights
loss = real_loss + fake_loss
loss.backward()
opt_d.step()
return loss.item(), real_score, fake_score
def train_generator(opt_g):
# Clear generator gradients
opt_g.zero_grad()
# Generate fake images
latent = torch.randn(batch_size, latent_size, 1, 1, device=device)
fake_images = generator(latent)
# Try to fool the discriminator
preds = discriminator(fake_images)
targets = torch.ones(batch_size, 1, device=device)
loss = F.binary_cross_entropy(preds, targets)
# Update generator weights
loss.backward()
opt_g.step()
return loss.item()
from torchvision.utils import save_image
sample_dir = 'generated'
os.makedirs(sample_dir, exist_ok=True)
def save_samples(index, latent_tensors, show=True):
fake_images = generator(latent_tensors)
fake_fname = 'generated-images-{0:0=4d}.png'.format(index)
save_image(denorm(fake_images), os.path.join(sample_dir, fake_fname), nrow=8)
print('Saving', fake_fname)
if show:
fig, ax = plt.subplots(figsize=(8, 8))
ax.set_xticks([]); ax.set_yticks([])
ax.imshow(make_grid(fake_images.cpu().detach(), nrow=8).permute(1, 2, 0))
fixed_latent = torch.randn(64, latent_size, 1, 1, device=device)
save_samples(0, fixed_latent)
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Saving generated-images-0000.png
from tqdm.notebook import tqdm
import torch.nn.functional as F
def fit(epochs, lr, start_idx=1):
torch.cuda.empty_cache()
# Losses & scores
losses_g = []
losses_d = []
real_scores = []
fake_scores = []
# Create optimizers
opt_d = torch.optim.Adam(discriminator.parameters(), lr=lr, betas=(0.5, 0.999))
opt_g = torch.optim.Adam(generator.parameters(), lr=lr, betas=(0.5, 0.999))
for epoch in range(epochs):
for real_images, _ in tqdm(train_dl):
# Train discriminator
loss_d, real_score, fake_score = train_discriminator(real_images, opt_d)
# Train generator
loss_g = train_generator(opt_g)
# Record losses & scores
losses_g.append(loss_g)
losses_d.append(loss_d)
real_scores.append(real_score)
fake_scores.append(fake_score)
# Log losses & scores (last batch)
print("Epoch [{}/{}], loss_g: {:.4f}, loss_d: {:.4f}, real_score: {:.4f}, fake_score: {:.4f}".format(
epoch+1, epochs, loss_g, loss_d, real_score, fake_score))
# Save generated images
save_samples(epoch+start_idx, fixed_latent, show=False)
return losses_g, losses_d, real_scores, fake_scores
lr = 0.001 # Learning rate
epochs = 150 # Time frame
history = fit(epochs,lr) # Training the machine to generate realistic artwork
/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:481: UserWarning: This DataLoader will create 3 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary. cpuset_checked))
Epoch [1/150], loss_g: 3.9710, loss_d: 0.1516, real_score: 0.9295, fake_score: 0.0695 Saving generated-images-0001.png
Epoch [2/150], loss_g: 2.5820, loss_d: 0.7019, real_score: 0.5927, fake_score: 0.0549 Saving generated-images-0002.png
Epoch [3/150], loss_g: 1.5703, loss_d: 1.0796, real_score: 0.5785, fake_score: 0.3478 Saving generated-images-0003.png
Epoch [4/150], loss_g: 2.0581, loss_d: 1.1896, real_score: 0.6421, fake_score: 0.4836 Saving generated-images-0004.png
Epoch [5/150], loss_g: 1.8414, loss_d: 0.9864, real_score: 0.7161, fake_score: 0.4407 Saving generated-images-0005.png
Epoch [6/150], loss_g: 2.5509, loss_d: 3.4502, real_score: 0.0922, fake_score: 0.0151 Saving generated-images-0006.png
Epoch [7/150], loss_g: 1.6868, loss_d: 0.9463, real_score: 0.4764, fake_score: 0.1090 Saving generated-images-0007.png
Epoch [8/150], loss_g: 1.6411, loss_d: 0.8523, real_score: 0.5831, fake_score: 0.2242 Saving generated-images-0008.png
Epoch [9/150], loss_g: 3.4504, loss_d: 0.9672, real_score: 0.7877, fake_score: 0.4644 Saving generated-images-0009.png
Epoch [10/150], loss_g: 2.1021, loss_d: 0.8177, real_score: 0.7148, fake_score: 0.3558 Saving generated-images-0010.png
Epoch [11/150], loss_g: 1.4156, loss_d: 0.8894, real_score: 0.5131, fake_score: 0.1397 Saving generated-images-0011.png
Epoch [12/150], loss_g: 0.5518, loss_d: 1.2296, real_score: 0.5521, fake_score: 0.4022 Saving generated-images-0012.png
Epoch [13/150], loss_g: 3.4264, loss_d: 1.2914, real_score: 0.8547, fake_score: 0.6316 Saving generated-images-0013.png
Epoch [14/150], loss_g: 1.5645, loss_d: 1.3426, real_score: 0.3131, fake_score: 0.0312 Saving generated-images-0014.png
Epoch [15/150], loss_g: 2.1484, loss_d: 1.0958, real_score: 0.6633, fake_score: 0.4336 Saving generated-images-0015.png
Epoch [16/150], loss_g: 3.1294, loss_d: 0.5695, real_score: 0.7150, fake_score: 0.1606 Saving generated-images-0016.png
Epoch [17/150], loss_g: 3.0301, loss_d: 0.8872, real_score: 0.6988, fake_score: 0.3654 Saving generated-images-0017.png
Epoch [18/150], loss_g: 2.6690, loss_d: 0.5079, real_score: 0.7713, fake_score: 0.1884 Saving generated-images-0018.png
Epoch [19/150], loss_g: 2.5861, loss_d: 0.5247, real_score: 0.8650, fake_score: 0.2972 Saving generated-images-0019.png
Epoch [20/150], loss_g: 3.3735, loss_d: 1.2860, real_score: 0.8079, fake_score: 0.5946 Saving generated-images-0020.png
Epoch [21/150], loss_g: 2.3930, loss_d: 1.0228, real_score: 0.6118, fake_score: 0.3033 Saving generated-images-0021.png
Epoch [22/150], loss_g: 1.3461, loss_d: 1.2178, real_score: 0.3820, fake_score: 0.0811 Saving generated-images-0022.png
Epoch [23/150], loss_g: 3.3396, loss_d: 1.4828, real_score: 0.3121, fake_score: 0.0193 Saving generated-images-0023.png
Epoch [24/150], loss_g: 2.6522, loss_d: 0.7612, real_score: 0.7916, fake_score: 0.3663 Saving generated-images-0024.png
Epoch [25/150], loss_g: 2.2347, loss_d: 0.9153, real_score: 0.5416, fake_score: 0.1542 Saving generated-images-0025.png
Epoch [26/150], loss_g: 3.7900, loss_d: 0.7720, real_score: 0.8304, fake_score: 0.3800 Saving generated-images-0026.png
Epoch [27/150], loss_g: 2.7836, loss_d: 1.1055, real_score: 0.6948, fake_score: 0.4625 Saving generated-images-0027.png
Epoch [28/150], loss_g: 3.5630, loss_d: 0.9518, real_score: 0.7453, fake_score: 0.3991 Saving generated-images-0028.png
Epoch [29/150], loss_g: 3.4189, loss_d: 1.7818, real_score: 0.2221, fake_score: 0.0075 Saving generated-images-0029.png
Epoch [30/150], loss_g: 2.6192, loss_d: 0.7018, real_score: 0.6500, fake_score: 0.1557 Saving generated-images-0030.png
Epoch [31/150], loss_g: 2.5411, loss_d: 0.7733, real_score: 0.5826, fake_score: 0.1182 Saving generated-images-0031.png
Epoch [32/150], loss_g: 3.6792, loss_d: 0.8351, real_score: 0.6437, fake_score: 0.2354 Saving generated-images-0032.png
Epoch [33/150], loss_g: 2.3559, loss_d: 0.6220, real_score: 0.7564, fake_score: 0.2238 Saving generated-images-0033.png
Epoch [34/150], loss_g: 5.3563, loss_d: 1.1670, real_score: 0.7946, fake_score: 0.5117 Saving generated-images-0034.png
Epoch [35/150], loss_g: 3.3231, loss_d: 0.6452, real_score: 0.8347, fake_score: 0.3311 Saving generated-images-0035.png
Epoch [36/150], loss_g: 3.2674, loss_d: 1.1272, real_score: 0.4238, fake_score: 0.0196 Saving generated-images-0036.png
Epoch [37/150], loss_g: 4.4647, loss_d: 0.8216, real_score: 0.8714, fake_score: 0.4492 Saving generated-images-0037.png
Epoch [38/150], loss_g: 3.6046, loss_d: 0.5918, real_score: 0.9052, fake_score: 0.3367 Saving generated-images-0038.png
Epoch [39/150], loss_g: 1.9741, loss_d: 0.7756, real_score: 0.5893, fake_score: 0.0972 Saving generated-images-0039.png
Epoch [40/150], loss_g: 2.0916, loss_d: 0.7914, real_score: 0.5970, fake_score: 0.1463 Saving generated-images-0040.png
Epoch [41/150], loss_g: 2.0994, loss_d: 0.7151, real_score: 0.6766, fake_score: 0.1958 Saving generated-images-0041.png
Epoch [42/150], loss_g: 2.8630, loss_d: 0.5603, real_score: 0.7995, fake_score: 0.2461 Saving generated-images-0042.png
Epoch [43/150], loss_g: 1.8142, loss_d: 1.9820, real_score: 0.2091, fake_score: 0.0125 Saving generated-images-0043.png
Epoch [44/150], loss_g: 2.7452, loss_d: 0.8536, real_score: 0.5089, fake_score: 0.0242 Saving generated-images-0044.png
Epoch [45/150], loss_g: 1.8952, loss_d: 1.1249, real_score: 0.4479, fake_score: 0.0856 Saving generated-images-0045.png
Epoch [46/150], loss_g: 1.9558, loss_d: 0.6558, real_score: 0.6391, fake_score: 0.1125 Saving generated-images-0046.png
Epoch [47/150], loss_g: 2.8984, loss_d: 0.5145, real_score: 0.7933, fake_score: 0.1915 Saving generated-images-0047.png
Epoch [48/150], loss_g: 4.0753, loss_d: 0.8915, real_score: 0.8492, fake_score: 0.4539 Saving generated-images-0048.png
Epoch [49/150], loss_g: 4.2568, loss_d: 0.6710, real_score: 0.8676, fake_score: 0.3641 Saving generated-images-0049.png
Epoch [50/150], loss_g: 5.4245, loss_d: 1.3347, real_score: 0.8881, fake_score: 0.6308 Saving generated-images-0050.png
Epoch [51/150], loss_g: 4.7940, loss_d: 1.0111, real_score: 0.9635, fake_score: 0.5311 Saving generated-images-0051.png
Epoch [52/150], loss_g: 3.5937, loss_d: 0.7244, real_score: 0.6869, fake_score: 0.1988 Saving generated-images-0052.png
Epoch [53/150], loss_g: 2.1342, loss_d: 0.5298, real_score: 0.7458, fake_score: 0.1369 Saving generated-images-0053.png
Epoch [54/150], loss_g: 3.0071, loss_d: 0.4532, real_score: 0.7581, fake_score: 0.1189 Saving generated-images-0054.png
Epoch [55/150], loss_g: 4.3144, loss_d: 1.2047, real_score: 0.8006, fake_score: 0.5503 Saving generated-images-0055.png
Epoch [56/150], loss_g: 2.7617, loss_d: 0.7063, real_score: 0.7937, fake_score: 0.3215 Saving generated-images-0056.png
Epoch [57/150], loss_g: 2.2412, loss_d: 0.4561, real_score: 0.8711, fake_score: 0.2385 Saving generated-images-0057.png
Epoch [58/150], loss_g: 2.0591, loss_d: 0.8739, real_score: 0.5802, fake_score: 0.1796 Saving generated-images-0058.png
Epoch [59/150], loss_g: 0.7483, loss_d: 1.9471, real_score: 0.2240, fake_score: 0.0208 Saving generated-images-0059.png
Epoch [60/150], loss_g: 2.6274, loss_d: 0.4681, real_score: 0.8146, fake_score: 0.2002 Saving generated-images-0060.png
Epoch [61/150], loss_g: 4.2985, loss_d: 0.6820, real_score: 0.8654, fake_score: 0.3445 Saving generated-images-0061.png
Epoch [62/150], loss_g: 3.5997, loss_d: 0.7990, real_score: 0.7759, fake_score: 0.3193 Saving generated-images-0062.png
Epoch [63/150], loss_g: 3.2926, loss_d: 0.5505, real_score: 0.7484, fake_score: 0.1756 Saving generated-images-0063.png
Epoch [64/150], loss_g: 2.0621, loss_d: 1.5682, real_score: 0.4067, fake_score: 0.2365 Saving generated-images-0064.png
Epoch [65/150], loss_g: 0.8116, loss_d: 1.2361, real_score: 0.4087, fake_score: 0.0638 Saving generated-images-0065.png
Epoch [66/150], loss_g: 4.3684, loss_d: 0.6167, real_score: 0.7992, fake_score: 0.2612 Saving generated-images-0066.png
Epoch [67/150], loss_g: 2.7195, loss_d: 0.5962, real_score: 0.7107, fake_score: 0.1513 Saving generated-images-0067.png
Epoch [68/150], loss_g: 3.6808, loss_d: 0.3540, real_score: 0.8073, fake_score: 0.0881 Saving generated-images-0068.png
Epoch [69/150], loss_g: 2.7073, loss_d: 0.4998, real_score: 0.7539, fake_score: 0.1502 Saving generated-images-0069.png
Epoch [70/150], loss_g: 2.9518, loss_d: 0.3872, real_score: 0.7944, fake_score: 0.1024 Saving generated-images-0070.png
Epoch [71/150], loss_g: 2.9979, loss_d: 0.4536, real_score: 0.8096, fake_score: 0.1742 Saving generated-images-0071.png
Epoch [72/150], loss_g: 2.9753, loss_d: 0.4151, real_score: 0.7541, fake_score: 0.0711 Saving generated-images-0072.png
Epoch [73/150], loss_g: 2.0042, loss_d: 0.4619, real_score: 0.8684, fake_score: 0.2282 Saving generated-images-0073.png
Epoch [74/150], loss_g: 1.7358, loss_d: 0.6919, real_score: 0.7686, fake_score: 0.2512 Saving generated-images-0074.png
Epoch [75/150], loss_g: 3.9122, loss_d: 0.3993, real_score: 0.9497, fake_score: 0.2310 Saving generated-images-0075.png
Epoch [76/150], loss_g: 4.2377, loss_d: 0.5506, real_score: 0.8946, fake_score: 0.2940 Saving generated-images-0076.png
Epoch [77/150], loss_g: 4.0915, loss_d: 0.4942, real_score: 0.9262, fake_score: 0.2677 Saving generated-images-0077.png
Epoch [78/150], loss_g: 4.9980, loss_d: 0.1965, real_score: 0.9000, fake_score: 0.0625 Saving generated-images-0078.png
Epoch [79/150], loss_g: 4.9619, loss_d: 0.3980, real_score: 0.7253, fake_score: 0.0176 Saving generated-images-0079.png
Epoch [80/150], loss_g: 4.1260, loss_d: 1.0521, real_score: 0.6787, fake_score: 0.1658 Saving generated-images-0080.png
Epoch [81/150], loss_g: 4.5742, loss_d: 0.4208, real_score: 0.9410, fake_score: 0.2461 Saving generated-images-0081.png
Epoch [82/150], loss_g: 4.0620, loss_d: 0.4606, real_score: 0.8384, fake_score: 0.1978 Saving generated-images-0082.png
Epoch [83/150], loss_g: 5.5376, loss_d: 0.8054, real_score: 0.9322, fake_score: 0.4107 Saving generated-images-0083.png
Epoch [84/150], loss_g: 2.4910, loss_d: 0.8130, real_score: 0.5425, fake_score: 0.0390 Saving generated-images-0084.png
Epoch [85/150], loss_g: 3.4236, loss_d: 0.2230, real_score: 0.9625, fake_score: 0.1340 Saving generated-images-0085.png
Epoch [86/150], loss_g: 4.4920, loss_d: 0.5097, real_score: 0.8975, fake_score: 0.2644 Saving generated-images-0086.png
Epoch [87/150], loss_g: 2.1013, loss_d: 0.3928, real_score: 0.7571, fake_score: 0.0330 Saving generated-images-0087.png
Epoch [88/150], loss_g: 5.4154, loss_d: 1.4221, real_score: 0.9121, fake_score: 0.5819 Saving generated-images-0088.png
Epoch [89/150], loss_g: 2.7767, loss_d: 0.3396, real_score: 0.8902, fake_score: 0.1400 Saving generated-images-0089.png
Epoch [90/150], loss_g: 3.5011, loss_d: 0.2958, real_score: 0.8570, fake_score: 0.1021 Saving generated-images-0090.png
Epoch [91/150], loss_g: 4.8175, loss_d: 0.4309, real_score: 0.9368, fake_score: 0.2168 Saving generated-images-0091.png
Epoch [92/150], loss_g: 3.5436, loss_d: 0.4247, real_score: 0.8182, fake_score: 0.1353 Saving generated-images-0092.png
Epoch [93/150], loss_g: 4.5930, loss_d: 0.3704, real_score: 0.9836, fake_score: 0.2472 Saving generated-images-0093.png
Epoch [94/150], loss_g: 3.5814, loss_d: 0.3370, real_score: 0.9749, fake_score: 0.2257 Saving generated-images-0094.png
Epoch [95/150], loss_g: 4.8451, loss_d: 0.4139, real_score: 0.9644, fake_score: 0.2454 Saving generated-images-0095.png
Epoch [96/150], loss_g: 4.0887, loss_d: 0.1583, real_score: 0.9366, fake_score: 0.0744 Saving generated-images-0096.png
Epoch [97/150], loss_g: 3.0026, loss_d: 0.4557, real_score: 0.8182, fake_score: 0.1653 Saving generated-images-0097.png
Epoch [98/150], loss_g: 2.8648, loss_d: 0.5298, real_score: 0.6628, fake_score: 0.0151 Saving generated-images-0098.png
Epoch [99/150], loss_g: 4.3331, loss_d: 0.0992, real_score: 0.9576, fake_score: 0.0461 Saving generated-images-0099.png
Epoch [100/150], loss_g: 4.2944, loss_d: 0.3360, real_score: 0.7808, fake_score: 0.0193 Saving generated-images-0100.png
Epoch [101/150], loss_g: 4.7066, loss_d: 0.3924, real_score: 0.9699, fake_score: 0.2365 Saving generated-images-0101.png
Epoch [102/150], loss_g: 3.4250, loss_d: 0.2871, real_score: 0.8772, fake_score: 0.1029 Saving generated-images-0102.png
Epoch [103/150], loss_g: 4.7313, loss_d: 0.0925, real_score: 0.9905, fake_score: 0.0669 Saving generated-images-0103.png
Epoch [104/150], loss_g: 3.3955, loss_d: 0.4162, real_score: 0.8376, fake_score: 0.1515 Saving generated-images-0104.png
Epoch [105/150], loss_g: 4.5333, loss_d: 0.2383, real_score: 0.9568, fake_score: 0.1473 Saving generated-images-0105.png
Epoch [106/150], loss_g: 4.5898, loss_d: 0.1877, real_score: 0.9865, fake_score: 0.1359 Saving generated-images-0106.png
Epoch [107/150], loss_g: 4.7751, loss_d: 0.1612, real_score: 0.9332, fake_score: 0.0564 Saving generated-images-0107.png
Epoch [108/150], loss_g: 5.0860, loss_d: 0.2537, real_score: 0.9531, fake_score: 0.1490 Saving generated-images-0108.png
Epoch [109/150], loss_g: 0.7297, loss_d: 2.6766, real_score: 0.2499, fake_score: 0.0388 Saving generated-images-0109.png
Epoch [110/150], loss_g: 6.3682, loss_d: 0.4263, real_score: 0.9900, fake_score: 0.2230 Saving generated-images-0110.png
Epoch [111/150], loss_g: 5.3709, loss_d: 0.1056, real_score: 0.9410, fake_score: 0.0357 Saving generated-images-0111.png
Epoch [112/150], loss_g: 3.7062, loss_d: 0.2787, real_score: 0.8690, fake_score: 0.0815 Saving generated-images-0112.png
Epoch [113/150], loss_g: 4.2357, loss_d: 0.1618, real_score: 0.9051, fake_score: 0.0438 Saving generated-images-0113.png
Epoch [114/150], loss_g: 2.7802, loss_d: 0.3268, real_score: 0.7784, fake_score: 0.0235 Saving generated-images-0114.png
Epoch [115/150], loss_g: 6.3854, loss_d: 0.0789, real_score: 0.9658, fake_score: 0.0303 Saving generated-images-0115.png
Epoch [116/150], loss_g: 5.0244, loss_d: 0.2392, real_score: 0.9621, fake_score: 0.1543 Saving generated-images-0116.png
Epoch [117/150], loss_g: 7.1596, loss_d: 0.1959, real_score: 0.8784, fake_score: 0.0102 Saving generated-images-0117.png
Epoch [118/150], loss_g: 3.3814, loss_d: 0.2093, real_score: 0.8544, fake_score: 0.0179 Saving generated-images-0118.png
Epoch [119/150], loss_g: 4.0438, loss_d: 0.1250, real_score: 0.9515, fake_score: 0.0561 Saving generated-images-0119.png
Epoch [120/150], loss_g: 3.8221, loss_d: 0.1454, real_score: 0.8841, fake_score: 0.0071 Saving generated-images-0120.png
Epoch [121/150], loss_g: 1.6953, loss_d: 1.8816, real_score: 0.3493, fake_score: 0.0295 Saving generated-images-0121.png
Epoch [122/150], loss_g: 2.8221, loss_d: 0.3386, real_score: 0.9297, fake_score: 0.1864 Saving generated-images-0122.png
Epoch [123/150], loss_g: 3.7080, loss_d: 0.2766, real_score: 0.9498, fake_score: 0.1591 Saving generated-images-0123.png
Epoch [124/150], loss_g: 4.5504, loss_d: 0.1806, real_score: 0.9775, fake_score: 0.1238 Saving generated-images-0124.png
Epoch [125/150], loss_g: 4.9251, loss_d: 0.1806, real_score: 0.9486, fake_score: 0.0954 Saving generated-images-0125.png
Epoch [126/150], loss_g: 5.0063, loss_d: 0.0570, real_score: 0.9885, fake_score: 0.0408 Saving generated-images-0126.png
Epoch [127/150], loss_g: 5.7151, loss_d: 0.2717, real_score: 0.8518, fake_score: 0.0479 Saving generated-images-0127.png
Epoch [128/150], loss_g: 5.3457, loss_d: 0.0771, real_score: 0.9535, fake_score: 0.0243 Saving generated-images-0128.png
Epoch [129/150], loss_g: 2.9439, loss_d: 0.3533, real_score: 0.7622, fake_score: 0.0112 Saving generated-images-0129.png
Epoch [130/150], loss_g: 0.3393, loss_d: 1.0059, real_score: 0.5228, fake_score: 0.0054 Saving generated-images-0130.png
Epoch [131/150], loss_g: 4.9004, loss_d: 0.0813, real_score: 0.9789, fake_score: 0.0389 Saving generated-images-0131.png
Epoch [132/150], loss_g: 5.1014, loss_d: 0.0769, real_score: 0.9733, fake_score: 0.0449 Saving generated-images-0132.png
Epoch [133/150], loss_g: 6.1455, loss_d: 0.0681, real_score: 0.9611, fake_score: 0.0248 Saving generated-images-0133.png
Epoch [134/150], loss_g: 4.4771, loss_d: 0.1317, real_score: 0.9509, fake_score: 0.0556 Saving generated-images-0134.png
Epoch [135/150], loss_g: 4.1678, loss_d: 0.1706, real_score: 0.9520, fake_score: 0.0907 Saving generated-images-0135.png
Epoch [136/150], loss_g: 3.6149, loss_d: 0.4515, real_score: 0.7224, fake_score: 0.0015 Saving generated-images-0136.png
Epoch [137/150], loss_g: 2.6876, loss_d: 0.2118, real_score: 0.8509, fake_score: 0.0223 Saving generated-images-0137.png
Epoch [138/150], loss_g: 10.2805, loss_d: 6.1026, real_score: 0.9609, fake_score: 0.7295 Saving generated-images-0138.png
Epoch [139/150], loss_g: 4.5513, loss_d: 0.3021, real_score: 0.8852, fake_score: 0.0935 Saving generated-images-0139.png
Epoch [140/150], loss_g: 5.4818, loss_d: 0.1369, real_score: 0.9112, fake_score: 0.0289 Saving generated-images-0140.png
Epoch [141/150], loss_g: 4.7548, loss_d: 0.2269, real_score: 0.9289, fake_score: 0.0960 Saving generated-images-0141.png
Epoch [142/150], loss_g: 3.7223, loss_d: 0.1509, real_score: 0.9177, fake_score: 0.0494 Saving generated-images-0142.png
Epoch [143/150], loss_g: 4.8716, loss_d: 0.1700, real_score: 0.9151, fake_score: 0.0405 Saving generated-images-0143.png
Epoch [144/150], loss_g: 7.7788, loss_d: 0.1527, real_score: 0.9968, fake_score: 0.1165 Saving generated-images-0144.png
Epoch [145/150], loss_g: 5.7901, loss_d: 0.1180, real_score: 0.9495, fake_score: 0.0513 Saving generated-images-0145.png
Epoch [146/150], loss_g: 5.1051, loss_d: 0.1425, real_score: 0.9097, fake_score: 0.0239 Saving generated-images-0146.png
Epoch [147/150], loss_g: 6.9828, loss_d: 0.0392, real_score: 0.9871, fake_score: 0.0228 Saving generated-images-0147.png
Epoch [148/150], loss_g: 5.5712, loss_d: 0.8249, real_score: 0.8907, fake_score: 0.3209 Saving generated-images-0148.png
Epoch [149/150], loss_g: 5.6367, loss_d: 0.0884, real_score: 0.9779, fake_score: 0.0495 Saving generated-images-0149.png
Epoch [150/150], loss_g: 5.5613, loss_d: 0.0838, real_score: 0.9422, fake_score: 0.0151 Saving generated-images-0150.png
from IPython.display import Image # Now we check machine's output from start to end of the training
Image('./generated/generated-images-0013.png')
Image('./generated/generated-images-0037.png')
Image('./generated/generated-images-0072.png')
Image('./generated/generated-images-0103.png')
Image('./generated/generated-images-0138.png')
Image('./generated/generated-images-0150.png') # Final generated batch of artworks
losses_g, losses_d, real_scores, fake_scores = history
# Save the model checkpoints
torch.save(generator.state_dict(), 'G.ckpt')
torch.save(discriminator.state_dict(), 'D.ckpt')
plt.plot(losses_d, '-')
plt.plot(losses_g, '-')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(['Discriminator', 'Generator'])
plt.title('Losses');
plt.plot(real_scores, '-')
plt.plot(fake_scores, '-')
plt.xlabel('epoch')
plt.ylabel('score')
plt.legend(['Real', 'Fake'])
plt.title('Scores');